static void svm_update_guest_efer(struct vcpu *v)
{
struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
+ bool_t lma = v->arch.hvm_vcpu.guest_efer & EFER_LMA;
vmcb->efer = (v->arch.hvm_vcpu.guest_efer | EFER_SVME) & ~EFER_LME;
- if ( vmcb->efer & EFER_LMA )
+ if ( lma )
vmcb->efer |= EFER_LME;
+
+ /*
+ * In legacy mode (EFER.LMA=0) we natively support SYSENTER/SYSEXIT with
+ * no need for MSR intercepts. Ehen EFER.LMA=1 we must trap and emulate.
+ */
+ svm_intercept_msr(v, MSR_IA32_SYSENTER_CS, lma);
+ svm_intercept_msr(v, MSR_IA32_SYSENTER_ESP, lma);
+ svm_intercept_msr(v, MSR_IA32_SYSENTER_EIP, lma);
}
static void svm_flush_guest_tlbs(void)
return hsa;
}
-void svm_disable_intercept_for_msr(struct vcpu *v, u32 msr)
+void svm_intercept_msr(struct vcpu *v, uint32_t msr, int enable)
{
unsigned long *msr_bitmap = v->arch.hvm_svm.msrpm;
+ unsigned long *msr_bit = NULL;
/*
* See AMD64 Programmers Manual, Vol 2, Section 15.10 (MSR-Bitmap Address).
*/
if ( msr <= 0x1fff )
- {
- __clear_bit(msr*2, msr_bitmap + 0x000/BYTES_PER_LONG);
- __clear_bit(msr*2+1, msr_bitmap + 0x000/BYTES_PER_LONG);
- }
+ msr_bit = msr_bitmap + 0x0000 / BYTES_PER_LONG;
else if ( (msr >= 0xc0000000) && (msr <= 0xc0001fff) )
+ msr_bit = msr_bitmap + 0x0800 / BYTES_PER_LONG;
+ else if ( (msr >= 0xc0010000) && (msr <= 0xc0011fff) )
+ msr_bit = msr_bitmap + 0x1000 / BYTES_PER_LONG;
+
+ BUG_ON(msr_bit == NULL);
+
+ msr &= 0x1fff;
+
+ if ( enable )
{
- msr &= 0x1fff;
- __clear_bit(msr*2, msr_bitmap + 0x800/BYTES_PER_LONG);
- __clear_bit(msr*2+1, msr_bitmap + 0x800/BYTES_PER_LONG);
- }
- else if ( (msr >= 0xc001000) && (msr <= 0xc0011fff) )
+ __set_bit(msr * 2, msr_bit);
+ __set_bit(msr * 2 + 1, msr_bit);
+ }
+ else
{
- msr &= 0x1fff;
- __clear_bit(msr*2, msr_bitmap + 0x1000/BYTES_PER_LONG);
- __clear_bit(msr*2+1, msr_bitmap + 0x1000/BYTES_PER_LONG);
+ __clear_bit(msr * 2, msr_bit);
+ __clear_bit(msr * 2 + 1, msr_bit);
}
}
if ( opt_softtsc )
vmcb->general1_intercepts |= GENERAL1_INTERCEPT_RDTSC;
- /* Guest EFER: *must* contain SVME or VMRUN will fail. */
- vmcb->efer = EFER_SVME;
+ /* Guest EFER. */
+ v->arch.hvm_vcpu.guest_efer = 0;
+ hvm_update_guest_efer(v);
/* Guest segment limits. */
vmcb->cs.limit = ~0u;
void setup_vmcb_dump(void);
-void svm_disable_intercept_for_msr(struct vcpu *v, u32 msr);
+void svm_intercept_msr(struct vcpu *v, uint32_t msr, int enable);
+#define svm_disable_intercept_for_msr(v, msr) svm_intercept_msr((v), (msr), 0)
+#define svm_enable_intercept_for_msr(v, msr) svm_intercept_msr((v), (msr), 1)
#endif /* ASM_X86_HVM_SVM_VMCS_H__ */